with its requirement.
Using trap&emulate for guest's each rdtsc instruction first, maybe it
can be optimized later.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
return TRAP_double_fault;
}
+void hvm_enable_rdtsc_exiting(struct domain *d)
+{
+ struct vcpu *v;
+
+ if ( opt_softtsc || !hvm_funcs.enable_rdtsc_exiting )
+ return;
+
+ for_each_vcpu ( d, v )
+ hvm_funcs.enable_rdtsc_exiting(v);
+}
+
+int hvm_gtsc_need_scale(struct domain *d)
+{
+ uint32_t gtsc_mhz, htsc_mhz;
+
+ gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000;
+ htsc_mhz = opt_softtsc ? 1000 : ((uint32_t)cpu_khz / 1000);
+
+ d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz));
+ return d->arch.hvm_domain.tsc_scaled;
+}
+
+static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc)
+{
+ uint32_t gtsc_khz, htsc_khz;
+
+ if ( !v->domain->arch.hvm_domain.tsc_scaled )
+ return host_tsc;
+
+ htsc_khz = opt_softtsc ? 1000000 : cpu_khz;
+ gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz;
+ return muldiv64(host_tsc, gtsc_khz, htsc_khz);
+}
+
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
{
- u64 host_tsc;
+ uint64_t host_tsc, scaled_htsc;
- rdtscll(host_tsc);
+ if ( opt_softtsc )
+ host_tsc = hvm_get_guest_time(v);
+ else
+ rdtscll(host_tsc);
+
+ scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc);
- v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - host_tsc;
+ v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - scaled_htsc;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
}
u64 hvm_get_guest_tsc(struct vcpu *v)
{
- u64 host_tsc;
+ uint64_t host_tsc, scaled_htsc;
if ( opt_softtsc )
host_tsc = hvm_get_guest_time(v);
else
rdtscll(host_tsc);
- return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
+ scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc);
+
+ return scaled_htsc + v->arch.hvm_vcpu.cache_tsc_offset;
}
void hvm_migrate_timers(struct vcpu *v)
/* Restore guest's preferred TSC frequency. */
d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz;
+ if ( hdr->gtsc_khz && hvm_gtsc_need_scale(d) )
+ {
+ hvm_enable_rdtsc_exiting(d);
+ gdprintk(XENLOG_WARNING, "Loading VM(id:%d) expects freq: %dmHz, "
+ "but host's freq :%"PRIu64"mHz, trap and emulate rdtsc!!!\n",
+ d->domain_id, hdr->gtsc_khz / 1000, opt_softtsc ? 1000 :
+ cpu_khz / 1000);
+ }
+
/* VGA state is not saved/restored, so we nobble the cache. */
d->arch.hvm_domain.stdvga.cache = 0;
vmx_vmcs_exit(v);
}
+static void vmx_enable_rdtsc_exiting(struct vcpu *v)
+{
+ vmx_vmcs_enter(v);
+ v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_vmcs_exit(v);
+ }
+
void do_nmi(struct cpu_user_regs *);
static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
.msr_write_intercept = vmx_msr_write_intercept,
.invlpg_intercept = vmx_invlpg_intercept,
.set_uc_mode = vmx_set_uc_mode,
- .set_info_guest = vmx_set_info_guest
+ .set_info_guest = vmx_set_info_guest,
+ .enable_rdtsc_exiting = vmx_enable_rdtsc_exiting
};
static unsigned long *vpid_bitmap;
pl->stime_offset = -(u64)get_s_time();
pl->last_guest_time = 0;
- d->arch.hvm_domain.gtsc_khz = cpu_khz;
+ d->arch.hvm_domain.gtsc_khz = opt_softtsc ? 1000000 : cpu_khz;
+ d->arch.hvm_domain.tsc_scaled = 0;
}
u64 hvm_get_guest_time(struct vcpu *v)
struct hvm_ioreq_page buf_ioreq;
uint32_t gtsc_khz; /* kHz */
- uint32_t pad0;
+ bool_t tsc_scaled;
struct pl_time pl_time;
struct hvm_io_handler io_handler;
void (*invlpg_intercept)(unsigned long vaddr);
void (*set_uc_mode)(struct vcpu *v);
void (*set_info_guest)(struct vcpu *v);
+ void (*enable_rdtsc_exiting)(struct vcpu *v);
};
extern struct hvm_function_table hvm_funcs;
uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
+void hvm_enable_rdtsc_exiting(struct domain *d);
+int hvm_gtsc_need_scale(struct domain *d);
+
static inline int hvm_cpu_up(void)
{
if ( hvm_funcs.cpu_up )